Traditional Feedforward neural network to approximate a black box function

This is just a toy example to test the basic functionality of Bokeh interactive plot!


In [106]:
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.optim as optim

import numpy as np
import matplotlib

from matplotlib import pyplot as plt


from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, output_notebook, ColumnDataSource
from bokeh.layouts import column, row, widgetbox
from bokeh.models import CustomJS, Slider, Select


output_notebook()

%matplotlib inline


Loading BokehJS ...

In [3]:
def fx(x):
    return np.random.normal(0, 5) + np.log(x)*np.sin(x/2)

In [4]:
data__x = np.arange(1,100,1)
data__y = fx(data__x)

In [5]:
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select"

p1 = figure(title="my ultimate function!", tools=TOOLS)
p1.line(data__x, data__y, legend="random graph")

p1.width = 1200
show(p1)



In [49]:
# try to estimate using regular Feed forward network

In [103]:
gpu_dtype = torch.cuda.FloatTensor 
print_every = 2500


# class ffNet(nn.Module):
#     def __init__(self):
#         super(ffNet, self).__init__()
#         self.fc1 = nn.Linear(1, 64)
#         self.relu = nn.ReLU()
#         self.fc2 = nn.Linear(64, 1)
        
#     def forward(self, x):
#         out = self.relu(self.fc1(x))
#         out = self.fc2(out)
#         return out
    
#   faster way to define network    

ffNet = nn.Sequential( 
    nn.Linear(1, 1024),
    nn.ReLU(inplace=True),
    nn.Linear(1024, 1024),
    nn.ReLU(inplace=True),
    nn.Linear(1024, 256),
    nn.ReLU(inplace=True),
    nn.Linear(256, 1)
)

In [97]:
def train(data, mode, loss_fn, optimizer, save_every_epoch=1000, num_epochs=2):
    model.train()
    
    history = {}
    
    xs = torch.from_numpy(data['xs']).unsqueeze(1)
    ys = torch.from_numpy(data['ys']).unsqueeze(1)
    N = len(ys)
    for epoch in range(num_epochs):
        x_var = Variable(xs.type(gpu_dtype))
        y_var = Variable(ys.type(gpu_dtype))
        
        scores = model(x_var)
        
        if (epoch + 1) % save_every_epoch == 0:
            history[str(epoch+1)] = scores
        
        loss = loss_fn(scores, y_var)
        if (epoch + 1) % print_every == 0:
                print('epoch = %d, loss = %.4f' % (epoch + 1, loss.data[0]))
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
    return history

In [98]:
model = ffNet.type(gpu_dtype)

loss_fn = nn.MSELoss().type(gpu_dtype)
optimizer = optim.Adam(model.parameters(), lr=1e-4)

xs = np.arange(1,100,0.1)
ys = fx(xs)

data =  {}
data['xs'] = xs
data['ys'] = ys

history_of_training = train(data, model, loss_fn, optimizer, num_epochs=150000)


epoch = 500, loss = 4.2065
epoch = 1000, loss = 4.0514
epoch = 1500, loss = 3.9321
epoch = 2000, loss = 3.8473
epoch = 2500, loss = 3.7203
epoch = 3000, loss = 3.6676
epoch = 3500, loss = 3.5577
epoch = 4000, loss = 3.4725
epoch = 4500, loss = 3.5264
epoch = 5000, loss = 3.3610
epoch = 5500, loss = 3.1831
epoch = 6000, loss = 3.1063
epoch = 6500, loss = 3.2245
epoch = 7000, loss = 3.1172
epoch = 7500, loss = 2.8339
epoch = 8000, loss = 3.0797
epoch = 8500, loss = 2.6550
epoch = 9000, loss = 3.0002
epoch = 9500, loss = 2.4694
epoch = 10000, loss = 2.3889
epoch = 10500, loss = 2.2978
epoch = 11000, loss = 2.9131
epoch = 11500, loss = 2.3735
epoch = 12000, loss = 2.0840
epoch = 12500, loss = 2.2256
epoch = 13000, loss = 1.9658
epoch = 13500, loss = 1.8407
epoch = 14000, loss = 1.7629
epoch = 14500, loss = 1.7925
epoch = 15000, loss = 1.6149
epoch = 15500, loss = 1.4879
epoch = 16000, loss = 1.7192
epoch = 16500, loss = 1.4376
epoch = 17000, loss = 1.2416
epoch = 17500, loss = 1.1595
epoch = 18000, loss = 1.2963
epoch = 18500, loss = 1.1328
epoch = 19000, loss = 1.0801
epoch = 19500, loss = 0.9001
epoch = 20000, loss = 1.3833
epoch = 20500, loss = 0.9198
epoch = 21000, loss = 0.7751
epoch = 21500, loss = 1.2914
epoch = 22000, loss = 0.8480
epoch = 22500, loss = 0.7552
epoch = 23000, loss = 0.7460
epoch = 23500, loss = 0.5753
epoch = 24000, loss = 0.5789
epoch = 24500, loss = 0.6598
epoch = 25000, loss = 0.3960
epoch = 25500, loss = 0.3523
epoch = 26000, loss = 0.4780
epoch = 26500, loss = 0.5133
epoch = 27000, loss = 0.5758
epoch = 27500, loss = 0.5305
epoch = 28000, loss = 0.7296
epoch = 28500, loss = 0.6026
epoch = 29000, loss = 0.2554
epoch = 29500, loss = 0.7642
epoch = 30000, loss = 0.3067
epoch = 30500, loss = 0.1999
epoch = 31000, loss = 0.7462
epoch = 31500, loss = 0.4343
epoch = 32000, loss = 0.3805
epoch = 32500, loss = 0.2791
epoch = 33000, loss = 0.1793
epoch = 33500, loss = 0.3254
epoch = 34000, loss = 0.2528
epoch = 34500, loss = 0.1505
epoch = 35000, loss = 0.1767
epoch = 35500, loss = 0.1595
epoch = 36000, loss = 0.2076
epoch = 36500, loss = 0.3516
epoch = 37000, loss = 0.2329
epoch = 37500, loss = 0.2339
epoch = 38000, loss = 0.2779
epoch = 38500, loss = 0.2093
epoch = 39000, loss = 0.2267
epoch = 39500, loss = 0.2981
epoch = 40000, loss = 0.1264
epoch = 40500, loss = 0.2213
epoch = 41000, loss = 0.2837
epoch = 41500, loss = 0.2608
epoch = 42000, loss = 0.4422
epoch = 42500, loss = 0.1218
epoch = 43000, loss = 0.1225
epoch = 43500, loss = 0.1817
epoch = 44000, loss = 0.2644
epoch = 44500, loss = 0.1466
epoch = 45000, loss = 0.1974
epoch = 45500, loss = 0.1618
epoch = 46000, loss = 0.2443
epoch = 46500, loss = 0.2388
epoch = 47000, loss = 0.1580
epoch = 47500, loss = 0.2404
epoch = 48000, loss = 0.1322
epoch = 48500, loss = 0.2841
epoch = 49000, loss = 0.3962
epoch = 49500, loss = 0.2063
epoch = 50000, loss = 0.1152
epoch = 50500, loss = 0.0949
epoch = 51000, loss = 0.2826
epoch = 51500, loss = 0.2664
epoch = 52000, loss = 0.1016
epoch = 52500, loss = 0.0978
epoch = 53000, loss = 0.4301
epoch = 53500, loss = 0.1612
epoch = 54000, loss = 0.2415
epoch = 54500, loss = 0.2549
epoch = 55000, loss = 0.1973
epoch = 55500, loss = 0.2148
epoch = 56000, loss = 0.1397
epoch = 56500, loss = 0.1080
epoch = 57000, loss = 0.0895
epoch = 57500, loss = 0.2470
epoch = 58000, loss = 0.2112
epoch = 58500, loss = 0.0869
epoch = 59000, loss = 0.1487
epoch = 59500, loss = 0.1894
epoch = 60000, loss = 0.0865
epoch = 60500, loss = 0.1104
epoch = 61000, loss = 0.1124
epoch = 61500, loss = 0.0855
epoch = 62000, loss = 0.3597
epoch = 62500, loss = 0.1168
epoch = 63000, loss = 0.1742
epoch = 63500, loss = 0.2496
epoch = 64000, loss = 0.3107
epoch = 64500, loss = 0.1657
epoch = 65000, loss = 0.1014
epoch = 65500, loss = 0.1110
epoch = 66000, loss = 0.0860
epoch = 66500, loss = 0.0923
epoch = 67000, loss = 0.2389
epoch = 67500, loss = 0.1266
epoch = 68000, loss = 0.0731
epoch = 68500, loss = 0.1525
epoch = 69000, loss = 0.3041
epoch = 69500, loss = 0.4641
epoch = 70000, loss = 0.2295
epoch = 70500, loss = 0.0913
epoch = 71000, loss = 0.1009
epoch = 71500, loss = 0.2389
epoch = 72000, loss = 0.1117
epoch = 72500, loss = 0.2231
epoch = 73000, loss = 0.0944
epoch = 73500, loss = 0.0738
epoch = 74000, loss = 0.1754
epoch = 74500, loss = 0.1437
epoch = 75000, loss = 0.1293
epoch = 75500, loss = 0.1677
epoch = 76000, loss = 0.1198
epoch = 76500, loss = 0.2099
epoch = 77000, loss = 0.1137
epoch = 77500, loss = 0.1848
epoch = 78000, loss = 0.0956
epoch = 78500, loss = 0.4917
epoch = 79000, loss = 0.1573
epoch = 79500, loss = 0.1053
epoch = 80000, loss = 0.1048
epoch = 80500, loss = 0.0708
epoch = 81000, loss = 0.0779
epoch = 81500, loss = 0.0601
epoch = 82000, loss = 0.0723
epoch = 82500, loss = 0.2759
epoch = 83000, loss = 0.0831
epoch = 83500, loss = 0.0623
epoch = 84000, loss = 0.1234
epoch = 84500, loss = 0.0623
epoch = 85000, loss = 0.0994
epoch = 85500, loss = 0.0811
epoch = 86000, loss = 0.0704
epoch = 86500, loss = 0.1816
epoch = 87000, loss = 0.0665
epoch = 87500, loss = 0.1104
epoch = 88000, loss = 0.1998
epoch = 88500, loss = 0.1202
epoch = 89000, loss = 0.0572
epoch = 89500, loss = 0.1253
epoch = 90000, loss = 0.1028
epoch = 90500, loss = 0.0678
epoch = 91000, loss = 0.1384
epoch = 91500, loss = 0.0739
epoch = 92000, loss = 0.1431
epoch = 92500, loss = 0.0589
epoch = 93000, loss = 0.0756
epoch = 93500, loss = 0.0549
epoch = 94000, loss = 0.1323
epoch = 94500, loss = 0.0570
epoch = 95000, loss = 0.0823
epoch = 95500, loss = 0.0528
epoch = 96000, loss = 0.0519
epoch = 96500, loss = 0.0524
epoch = 97000, loss = 0.0589
epoch = 97500, loss = 0.5726
epoch = 98000, loss = 0.2076
epoch = 98500, loss = 0.1182
epoch = 99000, loss = 0.1096
epoch = 99500, loss = 0.0867
epoch = 100000, loss = 0.0695
epoch = 100500, loss = 0.0510
epoch = 101000, loss = 0.1228
epoch = 101500, loss = 0.0510
epoch = 102000, loss = 0.0498
epoch = 102500, loss = 0.2786
epoch = 103000, loss = 0.0933
epoch = 103500, loss = 0.0718
epoch = 104000, loss = 0.0579
epoch = 104500, loss = 0.0686
epoch = 105000, loss = 0.1026
epoch = 105500, loss = 0.1076
epoch = 106000, loss = 0.1288
epoch = 106500, loss = 0.0674
epoch = 107000, loss = 0.1445
epoch = 107500, loss = 0.1318
epoch = 108000, loss = 0.0550
epoch = 108500, loss = 0.0648
epoch = 109000, loss = 0.2004
epoch = 109500, loss = 0.0697
epoch = 110000, loss = 0.0459
epoch = 110500, loss = 0.1082
epoch = 111000, loss = 0.0784
epoch = 111500, loss = 0.0932
epoch = 112000, loss = 0.0774
epoch = 112500, loss = 0.0484
epoch = 113000, loss = 0.0894
epoch = 113500, loss = 0.0818
epoch = 114000, loss = 0.1826
epoch = 114500, loss = 0.1722
epoch = 115000, loss = 0.0713
epoch = 115500, loss = 0.1204
epoch = 116000, loss = 0.0536
epoch = 116500, loss = 0.0763
epoch = 117000, loss = 0.1320
epoch = 117500, loss = 0.0989
epoch = 118000, loss = 0.0478
epoch = 118500, loss = 0.1117
epoch = 119000, loss = 0.1045
epoch = 119500, loss = 0.0608
epoch = 120000, loss = 0.2088
epoch = 120500, loss = 0.1179
epoch = 121000, loss = 0.1637
epoch = 121500, loss = 0.0418
epoch = 122000, loss = 0.0534
epoch = 122500, loss = 0.0959
epoch = 123000, loss = 0.0839
epoch = 123500, loss = 0.1120
epoch = 124000, loss = 0.0473
epoch = 124500, loss = 0.0473
epoch = 125000, loss = 0.0731
epoch = 125500, loss = 0.0582
epoch = 126000, loss = 0.0504
epoch = 126500, loss = 0.0477
epoch = 127000, loss = 0.0398
epoch = 127500, loss = 0.0398
epoch = 128000, loss = 0.0480
epoch = 128500, loss = 0.0394
epoch = 129000, loss = 0.0531
epoch = 129500, loss = 0.1273
epoch = 130000, loss = 0.0388
epoch = 130500, loss = 0.0408
epoch = 131000, loss = 0.0390
epoch = 131500, loss = 0.1399
epoch = 132000, loss = 0.1961
epoch = 132500, loss = 0.0463
epoch = 133000, loss = 0.0455
epoch = 133500, loss = 0.0424
epoch = 134000, loss = 0.0438
epoch = 134500, loss = 0.0399
epoch = 135000, loss = 0.0469
epoch = 135500, loss = 0.1087
epoch = 136000, loss = 0.0378
epoch = 136500, loss = 0.0378
epoch = 137000, loss = 0.0901
epoch = 137500, loss = 0.0487
epoch = 138000, loss = 0.0523
epoch = 138500, loss = 0.0861
epoch = 139000, loss = 0.2757
epoch = 139500, loss = 0.0374
epoch = 140000, loss = 0.2074
epoch = 140500, loss = 0.0850
epoch = 141000, loss = 0.0442
epoch = 141500, loss = 0.0979
epoch = 142000, loss = 0.1008
epoch = 142500, loss = 0.1287
epoch = 143000, loss = 0.0427
epoch = 143500, loss = 0.0523
epoch = 144000, loss = 0.2427
epoch = 144500, loss = 0.0967
epoch = 145000, loss = 0.0637
epoch = 145500, loss = 0.0806
epoch = 146000, loss = 0.0702
epoch = 146500, loss = 0.0641
epoch = 147000, loss = 0.0437
epoch = 147500, loss = 0.2105
epoch = 148000, loss = 0.0370
epoch = 148500, loss = 0.1143
epoch = 149000, loss = 0.0785
epoch = 149500, loss = 0.0411
epoch = 150000, loss = 0.0987

In [99]:
model.eval()

xs = torch.from_numpy(data['xs']).unsqueeze(1)
x_var = Variable(xs.type(gpu_dtype))
y_pred = model(x_var).data.cpu().numpy().squeeze()

In [100]:
p1 = figure(title="my prediction of my ultimate function!", tools=TOOLS)
p1.line(data['xs'], y_pred, line_color="red")
p1.line(data['xs'], data['ys'], line_color="blue")
p1.width = 1200
show(p1)



In [104]:
history_of_training_numpy = {}
history_of_training_numpy['x'] = x=xs.numpy()

# for kee in history_of_training.keys():
#     history_of_training_numpy[kee] = history_of_training[kee].data.cpu().numpy().squeeze()

for i in range(1,16):
    history_of_training_numpy[str(i)] = history_of_training[str(i*10000)].data.cpu().numpy().squeeze()    
    
master = ColumnDataSource(data=history_of_training_numpy)

In [105]:
plot = figure(title="my prediction of my ultimate function over time!!", tools=TOOLS)
plot.line('x', 'y', source=source_final, line_width=3, line_alpha=0.6)
plot.width = 1200

plot.line(data['xs'], data['ys'], line_color="gray")

callback = CustomJS(args={
  'source': source_final, 
  'master' : master}, code="""
        var data = source.data;
        var master = master.data;
        var epoch = epoch_number.value;

        for (var e in data) delete data[e];
        
        data['x'] = master['x'];
        data['y'] = master[epoch.toString()];

        source.change.emit()
""")

epoch_number = Slider(start=1, end=15, value=5, step=1,
                    title="epoch_number", callback=callback)
callback.args["epoch_number"] = epoch_number

layout = column(
    plot,
    widgetbox(select, epoch_number),
)
show(layout)